const runtime.pageSize
77 uses
runtime (current package)
arena.go#L196: userArenaChunkPages = userArenaChunkBytes / pageSize
arena.go#L206: if userArenaChunkPages*pageSize != userArenaChunkBytes {
arena.go#L871: if s.npages*pageSize != userArenaChunkBytes {
arena.go#L892: sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)
arena.go#L897: gcController.heapInUse.add(-int64(s.npages * pageSize))
arena.go#L910: atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
arena.go#L911: atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
arena.go#L952: if s.npages*pageSize != userArenaChunkBytes {
malloc.go#L118: pageSize = _PageSize
malloc.go#L258: pagesPerArena = heapArenaBytes / pageSize
mbitmap.go#L579: return heapBitsSlice(span.base(), pageSize)
mbitmap.go#L581: return heapBitsSlice(span.base(), span.npages*pageSize)
mbitmap.go#L602: spanSize := span.npages * pageSize
mbitmap.go#L726: npages := alignUp(spaceNeeded, pageSize) / pageSize
mbitmap.go#L730: memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
mbitmap.go#L1683: pages := divRoundUp(bitmapBytes, pageSize)
mcache.go#L212: gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
mcache.go#L241: atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
mcache.go#L246: gcController.totalAlloc.Add(int64(npages * pageSize))
mcache.go#L249: gcController.update(int64(s.npages*pageSize), 0)
mgcscavenge.go#L130: maxPagesPerPhysPage = maxPhysPageSize / pageSize
mgcscavenge.go#L739: maxPages := max / pageSize
mgcscavenge.go#L740: if max%pageSize != 0 {
mgcscavenge.go#L749: minPages := physPageSize / pageSize
mgcscavenge.go#L763: addr := chunkBase(ci) + uintptr(base)*pageSize
mgcscavenge.go#L778: sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
mgcscavenge.go#L782: nbytes := int64(npages * pageSize)
mgcscavenge.go#L805: return uintptr(npages) * pageSize
mgcscavenge.go#L962: if physHugePageSize > pageSize && physHugePageSize > physPageSize {
mgcscavenge.go#L969: pagesPerHugePage := physHugePageSize / pageSize
mgcscavenge.go#L1104: newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
mgcscavenge.go#L1153: addr := chunkBase(ci) + uintptr(page+npages-1)*pageSize
mgcwork.go#L27: if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
mgcwork.go#L378: s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
mheap.go#L695: return ha.spans[(p/pageSize)%pagesPerArena]
mheap.go#L706: return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
mheap.go#L733: pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
mheap.go#L734: pageMask = byte(1 << ((p / pageSize) % 8))
mheap.go#L920: trace.GCSweepSpan((n0 - nFreed) * pageSize)
mheap.go#L994: p := base / pageSize
mheap.go#L1000: ai = arenaIndex(base + n*pageSize)
mheap.go#L1042: arenaLimit := arenaBase + npage*pageSize
mheap.go#L1065: npage -= (arenaLimit - arenaBase) / pageSize
mheap.go#L1178: needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
mheap.go#L1212: extraPages := physPageSize / pageSize
mheap.go#L1341: nbytes := npages * pageSize
mheap.go#L1389: nbytes := npages * pageSize
mheap.go#L1393: s.limit = s.base() + s.npages*pageSize
mheap.go#L1478: ask := alignUp(npage, pallocChunkPages) * pageSize
mheap.go#L1639: nbytes := s.npages * pageSize
mheap.go#L1838: arenaPage := (s.base() / pageSize) % pagesPerArena
mheap.go#L1846: arenaPage := (s.base() / pageSize) % pagesPerArena
mpagealloc.go#L59: pallocChunkBytes = pallocChunkPages * pageSize
mpagealloc.go#L118: return uint(p % pallocChunkBytes / pageSize)
mpagealloc.go#L428: p.update(base, size/pageSize, true, false)
mpagealloc.go#L489: limit := base + npages*pageSize - 1
mpagealloc.go#L573: limit := base + npages*pageSize - 1
mpagealloc.go#L602: return uintptr(scav) * pageSize
mpagealloc.go#L772: foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
mpagealloc.go#L810: addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
mpagealloc.go#L851: addr := chunkBase(ci) + uintptr(j)*pageSize
mpagealloc.go#L855: searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
mpagealloc.go#L894: addr = chunkBase(i) + uintptr(j)*pageSize
mpagealloc.go#L895: searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
mpagealloc.go#L940: limit := base + npages*pageSize - 1
mpagecache.go#L46: return c.base + i*pageSize, uintptr(scav) * pageSize
mpagecache.go#L66: return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
mpagecache.go#L138: base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
mpagecache.go#L155: base: alignDown(addr, 64*pageSize),
mpagecache.go#L181: p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
msize.go#L29: reqSize += pageSize - 1
msize.go#L33: return reqSize &^ (pageSize - 1)
traceallocfree.go#L39: w.varint(uint64(pageSize))
traceallocfree.go#L107: return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
 |
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |